{
/* Need a vpcu for calling unpins; for now, since we don't have
* per-vcpu shadows, any will do */
- struct vcpu *v = d->vcpu[0];
+ struct vcpu *v, *v2;
struct list_head *l, *t;
struct page_info *pg;
+ cpumask_t flushmask = CPU_MASK_NONE;
mfn_t smfn;
if ( chunk_is_available(d, order) ) return;
+ v = current;
+ if ( v->domain != d )
+ v = d->vcpu[0];
+
/* Stage one: walk the list of top-level pages, unpinning them */
perfc_incrc(shadow_prealloc_1);
list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
* loaded in cr3 on some vcpu. Walk them, unhooking the non-Xen
* mappings. */
perfc_incrc(shadow_prealloc_2);
- v = current;
- if ( v->domain != d )
- v = d->vcpu[0];
- /* Walk the list from the tail: recently used toplevels have been pulled
- * to the head */
list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
{
pg = list_entry(l, struct page_info, list);
smfn = page_to_mfn(pg);
shadow_unhook_mappings(v, smfn);
- /* Need to flush TLB if we've altered our own tables */
- if ( !shadow_mode_external(d) &&
- (pagetable_get_pfn(current->arch.shadow_table[0]) == mfn_x(smfn)
- || pagetable_get_pfn(current->arch.shadow_table[1]) == mfn_x(smfn)
- || pagetable_get_pfn(current->arch.shadow_table[2]) == mfn_x(smfn)
- || pagetable_get_pfn(current->arch.shadow_table[3]) == mfn_x(smfn)
- ) )
- local_flush_tlb();
-
+ /* Remember to flush TLBs: we have removed shadow entries that
+ * were in use by some vcpu(s). */
+ for_each_vcpu(d, v2)
+ {
+ if ( pagetable_get_pfn(v2->arch.shadow_table[0]) == mfn_x(smfn)
+ || pagetable_get_pfn(v2->arch.shadow_table[1]) == mfn_x(smfn)
+ || pagetable_get_pfn(v2->arch.shadow_table[2]) == mfn_x(smfn)
+ || pagetable_get_pfn(v2->arch.shadow_table[3]) == mfn_x(smfn)
+ )
+ cpus_or(flushmask, v2->vcpu_dirty_cpumask, flushmask);
+ }
+
/* See if that freed up a chunk of appropriate size */
- if ( chunk_is_available(d, order) ) return;
+ if ( chunk_is_available(d, order) )
+ {
+ flush_tlb_mask(flushmask);
+ return;
+ }
}
/* Nothing more we can do: all remaining shadows are of pages that
if ( all )
domain_crash(v->domain);
}
+
+ /* Need to flush TLBs now, so that linear maps are safe next time we
+ * take a fault. */
+ flush_tlb_mask(v->domain->domain_dirty_cpumask);
}
void
sh_mfn_is_a_page_table(gmfn) )
{
u32 flags = mfn_to_page(gmfn)->shadow_flags;
- mfn_t smfn;
if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) )
{
perfc_incrc(shadow_early_unshadow);
sh_remove_shadows(v, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
- return;
- }
- /* SHF_unhooked_mappings is set to make sure we only unhook
- * once in a single batch of updates. It is reset when this
- * top-level page is loaded into CR3 again */
- if ( !(flags & SHF_unhooked_mappings) )
- {
- perfc_incrc(shadow_early_unshadow_top);
- mfn_to_page(gmfn)->shadow_flags |= SHF_unhooked_mappings;
- if ( flags & SHF_L2_32 )
- {
- smfn = get_shadow_status(v, gmfn, PGC_SH_l2_32_shadow);
- shadow_unhook_mappings(v, smfn);
- }
- if ( flags & SHF_L2_PAE )
- {
- smfn = get_shadow_status(v, gmfn, PGC_SH_l2_pae_shadow);
- shadow_unhook_mappings(v, smfn);
- }
- if ( flags & SHF_L2H_PAE )
- {
- smfn = get_shadow_status(v, gmfn, PGC_SH_l2h_pae_shadow);
- shadow_unhook_mappings(v, smfn);
- }
- if ( flags & SHF_L4_64 )
- {
- smfn = get_shadow_status(v, gmfn, PGC_SH_l4_64_shadow);
- shadow_unhook_mappings(v, smfn);
- }
- }
+ }
}
v->arch.shadow.last_emulated_mfn = mfn_x(gmfn);
#endif
PERFCOUNTER_CPU(shadow_mappings, "shadow removes all mappings")
PERFCOUNTER_CPU(shadow_mappings_bf, "shadow rm-mappings brute-force")
PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit")
-PERFCOUNTER_CPU(shadow_early_unshadow_top, "shadow unhooks for fork/exit")
PERFCOUNTER_CPU(shadow_unshadow, "shadow unshadows a page")
PERFCOUNTER_CPU(shadow_up_pointer, "shadow unshadow by up-pointer")
PERFCOUNTER_CPU(shadow_unshadow_bf, "shadow unshadow brute-force")